Deep Learning Bootcamp November 2017, GPU Computing for Data Scientists

07 PyTorch two layer nn

Web: https://www.meetup.com/Tel-Aviv-Deep-Learning-Bootcamp/events/241762893/

Notebooks: On GitHub

Shlomo Kashani

PyTorch Imports


In [1]:
# !pip install pycuda
%reset -f
import numpy
import numpy as np

# imports
import numpy as np                     # numeric python lib
import matplotlib.image as mpimg       # reading images to numpy arrays
import matplotlib.pyplot as plt        # to plot any graph
import matplotlib.patches as mpatches  # to draw a circle at the mean contour
import scipy.ndimage as ndi            # to determine shape centrality
# matplotlib setup
%matplotlib inline
from pylab import rcParams
rcParams['figure.figsize'] = (6, 6)      # setting default size of plots

import tensorflow as tf 
print("tensorflow:" + tf.__version__)
!set "KERAS_BACKEND=tensorflow"

import numpy as np
import matplotlib.pyplot as plt

import cv2
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image

from PIL import Image, ImageFilter
from matplotlib.pyplot import imshow
import numpy as np
from PIL import Image
import matplotlib.pyplot as plot   # Library to plot
import matplotlib.cm as colormap   # Library to plot
import PIL
from PIL import Image as PILImage
import time
%matplotlib inline


import torch
import sys
print('__Python VERSION:', sys.version)
print('__pyTorch VERSION:', torch.__version__)
print('__CUDA VERSION')
from subprocess import call
# call(["nvcc", "--version"]) does not work
! nvcc --version
print('__CUDNN VERSION:', torch.backends.cudnn.version())
print('__Number CUDA Devices:', torch.cuda.device_count())
print('__Devices')
call(["nvidia-smi", "--format=csv", "--query-gpu=index,name,driver_version,memory.total,memory.used,memory.free"])
print('Active CUDA Device: GPU', torch.cuda.current_device())

print ('Available devices ', torch.cuda.device_count())
print ('Current cuda device ', torch.cuda.current_device())

from __future__ import print_function
import torch
x=torch.Tensor(3,2)
print (type(x))
print (x)
torch.from_numpy (np.zeros((3,4))).cuda()


/usr/lib/python2.7/dist-packages/matplotlib/font_manager.py:273: UserWarning: Matplotlib is building the font cache using fc-list. This may take a moment.
  warnings.warn('Matplotlib is building the font cache using fc-list. This may take a moment.')
tensorflow:1.2.1
('__Python VERSION:', '2.7.12 (default, Nov 19 2016, 06:48:10) \n[GCC 5.4.0 20160609]')
('__pyTorch VERSION:', '0.1.12+4eb448a')
__CUDA VERSION
nvcc: NVIDIA (R) Cuda compiler driver
Copyright (c) 2005-2016 NVIDIA Corporation
Built on Tue_Jan_10_13:22:03_CST_2017
Cuda compilation tools, release 8.0, V8.0.61
('__CUDNN VERSION:', 5110)
('__Number CUDA Devices:', 1L)
__Devices
('Active CUDA Device: GPU', 0L)
('Available devices ', 1L)
('Current cuda device ', 0L)
<class 'torch.FloatTensor'>

1.00000e-36 *
  1.6793  0.0000
  1.7689  0.0000
  0.0000  0.0000
[torch.FloatTensor of size 3x2]

Out[1]:
 0  0  0  0
 0  0  0  0
 0  0  0  0
[torch.cuda.DoubleTensor of size 3x4 (GPU 0)]

Two Layer NN using PyTorch NN module


In [9]:
# https://github.com/mkrphys/ipython-tikzmagic
# ! pip install git+git://github.com/mkrphys/ipython-tikzmagic.git
# ! apt-get install  -qyy imagemagick  pdf2svg ghostscript
# sudo apt-get install texlive-full

In [1]:
# # test

# %load_ext tikzmagic
# %reload_ext tikzmagic
# %tikz \draw (0,0) rectangle (1,3);

In [8]:
import numpy as np

import torch
from torch.autograd import Variable
from torch import optim

dtype = torch.cuda.FloatTensor 

from data_util import load_mnist


def build_model(input_dim, output_dim):
    model = torch.nn.Sequential().cuda()
#     model = torch.nn.DataParallel(model, device_ids=[0]).cuda()
    model.add_module("linear_1", torch.nn.Linear(input_dim, 512, bias=False))
    model.add_module("relu_1", torch.nn.ReLU())
    model.add_module("dropout_1", torch.nn.Dropout(0.2))
    model.add_module("linear_2", torch.nn.Linear(512, 512, bias=False))
    model.add_module("relu_2", torch.nn.ReLU())
    model.add_module("dropout_2", torch.nn.Dropout(0.2))
    model.add_module("linear_3", torch.nn.Linear(512, output_dim, bias=False))
    return model


def train(model, loss, optimizer, x_val, y_val):
    x = Variable(x_val, requires_grad=False)
    y = Variable(y_val, requires_grad=False)

    # Reset gradient
    optimizer.zero_grad()

    # Forward
    fx = model.forward(x)
    output = loss.forward(fx, y)

    # Backward
    output.backward()

    # Update parameters
    optimizer.step()

    return output.data[0]


def predict(model, x_val):
    x = Variable(x_val, requires_grad=False)
    output = model.forward(x)
    return output.data.numpy().argmax(axis=1)


def main():
    torch.manual_seed(42)
    trX, teX, trY, teY = load_mnist(onehot=False)
    trX = torch.from_numpy(trX).float()
    teX = torch.from_numpy(teX).float()
    trY = torch.from_numpy(trY).long()

    n_examples, n_features = trX.size()
    n_classes = 10
    model = build_model(n_features, n_classes)
    loss = torch.nn.CrossEntropyLoss(size_average=True)
    optimizer = optim.Adam(model.parameters())
    batch_size = 100

    for i in range(30):
        cost = 0.
        num_batches = n_examples // batch_size
        for k in range(num_batches):
            start, end = k * batch_size, (k + 1) * batch_size
            cost += train(model, loss, optimizer, trX[start:end], trY[start:end])
        predY = predict(model, teX)
        print("Epoch %d, cost = %f, acc = %.2f%%"
              % (i + 1, cost / num_batches, 100. * np.mean(predY == teY)))


if __name__ == "__main__":
    main()


Epoch 1, cost = 0.285651, acc = 95.48%
Epoch 2, cost = 0.114465, acc = 96.28%
Epoch 3, cost = 0.081446, acc = 96.02%
Epoch 4, cost = 0.063199, acc = 96.85%
Epoch 5, cost = 0.049565, acc = 97.37%
Epoch 6, cost = 0.043171, acc = 97.38%
Epoch 7, cost = 0.037471, acc = 96.79%
Epoch 8, cost = 0.032391, acc = 97.54%
Epoch 9, cost = 0.027450, acc = 97.15%
Epoch 10, cost = 0.030221, acc = 97.77%
Epoch 11, cost = 0.025462, acc = 97.47%
Epoch 12, cost = 0.025810, acc = 97.93%
Epoch 13, cost = 0.021117, acc = 97.48%
Epoch 14, cost = 0.020988, acc = 97.67%
Epoch 15, cost = 0.020017, acc = 97.58%
Epoch 16, cost = 0.020140, acc = 97.80%
Epoch 17, cost = 0.018000, acc = 97.85%
Epoch 18, cost = 0.018227, acc = 97.76%
Epoch 19, cost = 0.018760, acc = 97.51%
Epoch 20, cost = 0.018152, acc = 97.92%
Epoch 21, cost = 0.014606, acc = 97.89%
Epoch 22, cost = 0.016066, acc = 97.71%
Epoch 23, cost = 0.014409, acc = 97.86%
Epoch 24, cost = 0.016373, acc = 97.41%
Epoch 25, cost = 0.015187, acc = 97.51%
Epoch 26, cost = 0.014995, acc = 97.81%
Epoch 27, cost = 0.012487, acc = 97.87%
Epoch 28, cost = 0.017068, acc = 98.02%
Epoch 29, cost = 0.012375, acc = 97.84%
Epoch 30, cost = 0.012766, acc = 98.13%

In [ ]: